d->addr_limit = USER_DS;
- spin_lock_init(&d->page_list_lock);
+ spin_lock_init(&d->page_alloc_lock);
INIT_LIST_HEAD(&d->page_list);
d->max_pages = d->tot_pages = 0;
if ( d != NULL )
{
wmb(); /* Domain pointer must be visible before updating refcnt. */
- spin_lock(&d->page_list_lock);
+ spin_lock(&d->page_alloc_lock);
if ( unlikely(d->tot_pages >= d->max_pages) )
{
DPRINTK("Over-allocation for domain %u: %u >= %u\n",
d->domain, d->tot_pages, d->max_pages);
- spin_unlock(&d->page_list_lock);
+ spin_unlock(&d->page_alloc_lock);
goto free_and_exit;
}
list_add_tail(&page->list, &d->page_list);
page->count_and_flags = PGC_allocated | 1;
if ( unlikely(d->tot_pages++ == 0) )
get_domain(d);
- spin_unlock(&d->page_list_lock);
+ spin_unlock(&d->page_alloc_lock);
}
return page;
int drop_dom_ref;
struct domain *d = page->u.domain;
- /* Deallocation of such pages is handled out of band. */
if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
- return;
+ {
+ spin_lock_recursive(&d->page_alloc_lock);
+ drop_dom_ref = (--d->xenheap_pages == 0);
+ spin_unlock_recursive(&d->page_alloc_lock);
+ }
+ else
+ {
+ page->tlbflush_timestamp = tlbflush_clock;
+ page->u.cpu_mask = 1 << d->processor;
+
+ /* NB. May recursively lock from domain_relinquish_memory(). */
+ spin_lock_recursive(&d->page_alloc_lock);
+ list_del(&page->list);
+ drop_dom_ref = (--d->tot_pages == 0);
+ spin_unlock_recursive(&d->page_alloc_lock);
- page->tlbflush_timestamp = tlbflush_clock;
- page->u.cpu_mask = 1 << d->processor;
+ page->count_and_flags = 0;
+
+ spin_lock_irqsave(&free_list_lock, flags);
+ list_add(&page->list, &free_list);
+ free_pfns++;
+ spin_unlock_irqrestore(&free_list_lock, flags);
+ }
- /* NB. May recursively lock from domain_relinquish_memory(). */
- spin_lock_recursive(&d->page_list_lock);
- list_del(&page->list);
- drop_dom_ref = (--d->tot_pages == 0);
- spin_unlock_recursive(&d->page_list_lock);
if ( drop_dom_ref )
put_domain(d);
-
- page->count_and_flags = 0;
-
- spin_lock_irqsave(&free_list_lock, flags);
- list_add(&page->list, &free_list);
- free_pfns++;
- spin_unlock_irqrestore(&free_list_lock, flags);
}
put_page_and_type(&frame_table[pagetable_val(d->mm.pagetable) >>
PAGE_SHIFT]);
+ /* Relinquish Xen-heap pages. Currently this can only be 'shared_info'. */
+ page = virt_to_page(d->shared_info);
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_and_flags) )
+ put_page(page);
+
/* Relinquish all pages on the domain's allocation list. */
- spin_lock_recursive(&d->page_list_lock); /* may enter free_domain_page() */
+ spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domain_page */
list_for_each_safe ( ent, tmp, &d->page_list )
{
page = list_entry(ent, struct pfn_info, list);
}
while ( unlikely(y != x) );
}
- spin_unlock_recursive(&d->page_list_lock);
+ spin_unlock_recursive(&d->page_alloc_lock);
}
}
/* Construct a frame-allocation list for the initial domain. */
- for ( pfn = (alloc_start>>PAGE_SHIFT);
- pfn < (alloc_end>>PAGE_SHIFT);
- pfn++ )
+ for ( mfn = (alloc_start>>PAGE_SHIFT);
+ mfn < (alloc_end>>PAGE_SHIFT);
+ mfn++ )
{
- page = &frame_table[pfn];
+ page = &frame_table[mfn];
page->u.domain = p;
page->type_and_flags = 0;
page->count_and_flags = PGC_allocated | 1;
si->mfn_list = vphysmap_start;
/* Write the phys->machine and machine->phys table entries. */
- for ( pfn = 0; pfn < p->tot_pages; pfn++ )
+ for ( mfn = (alloc_start>>PAGE_SHIFT);
+ mfn < (alloc_end>>PAGE_SHIFT);
+ mfn++ )
{
- mfn = (alloc_start >> PAGE_SHIFT) + pfn;
+ pfn = mfn - (alloc_start>>PAGE_SHIFT);
((unsigned long *)vphysmap_start)[pfn] = mfn;
machine_to_phys_mapping[mfn] = pfn;
}
unsigned long flags;
struct domain *d;
s_time_t now = NOW();
+ struct list_head *ent;
+ struct pfn_info *page;
printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
(u32)(now>>32), (u32)now);
for_each_domain ( d )
{
- printk("Xen: DOM %u, CPU %d [has=%c] refcnt=%d nr_pages=%d\n",
+ printk("Xen: DOM %u, CPU %d [has=%c] refcnt=%d nr_pages=%d "
+ "xenheap_pages=%d\n",
d->domain, d->processor,
test_bit(DF_RUNNING, &d->flags) ? 'T':'F',
- atomic_read(&d->refcnt), d->tot_pages);
+ atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
+
+ if ( d->tot_pages < 10 )
+ {
+ list_for_each ( ent, &d->page_list )
+ {
+ page = list_entry(ent, struct pfn_info, list);
+ printk("Page %08x: caf=%08x, taf=%08x\n",
+ page_to_phys(page), page->count_and_flags,
+ page->type_and_flags);
+ }
+ }
+
+ page = virt_to_page(d->shared_info);
+ printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
+ page_to_phys(page), page->count_and_flags,
+ page->type_and_flags);
+
printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n",
d->shared_info->vcpu_data[0].evtchn_upcall_pending,
d->shared_info->vcpu_data[0].evtchn_upcall_mask);
*/
if ( d < e )
{
- spin_lock(&d->page_list_lock);
- spin_lock(&e->page_list_lock);
+ spin_lock(&d->page_alloc_lock);
+ spin_lock(&e->page_alloc_lock);
}
else
{
- spin_lock(&e->page_list_lock);
- spin_lock(&d->page_list_lock);
+ spin_lock(&e->page_alloc_lock);
+ spin_lock(&d->page_alloc_lock);
}
/* A domain shouldn't have PGC_allocated pages when it is dying. */
- if ( unlikely(test_bit(DF_DYING, &e->flags)) )
+ if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
+ unlikely(IS_XEN_HEAP_FRAME(page)) )
{
okay = 0;
goto reassign_fail;
list_add_tail(&page->list, &e->page_list);
reassign_fail:
- spin_unlock(&d->page_list_lock);
- spin_unlock(&e->page_list_lock);
+ spin_unlock(&d->page_alloc_lock);
+ spin_unlock(&e->page_alloc_lock);
break;
case MMUEXT_RESET_SUBJECTDOM: